import matplotlib.pyplot as plt
from skimage import io
from matplotlib import cm
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
import os, PIL, pathlib, math
from imgaug import augmenters as iaa
from PIL import Image
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
%matplotlib inline
strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
def show_grid(image_list,nrows,ncols,label_list=None,show_labels=False,savename=None,figsize=(10,10),showaxis='off'):
if type(image_list) is not list:
if(image_list.shape[-1]==1):
image_list = [image_list[i,:,:,0] for i in range(image_list.shape[0])]
elif(image_list.shape[-1]==3):
image_list = [image_list[i,:,:,:] for i in range(image_list.shape[0])]
fig = plt.figure(None, figsize,frameon=False)
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(nrows, ncols), # creates 2x2 grid of axes
axes_pad=0.3, # pad between axes in inch.
share_all=True,
)
for i in range(nrows*ncols):
ax = grid[i]
ax.imshow((image_list[i]*255).astype(np.uint8),cmap='Greys_r') # The AxesGrid object work as a list of axes.
ax.axis('off')
if show_labels:
ax.set_title(class_mapping[y_int[i]])
if savename != None:
plt.savefig(savename,bbox_inches='tight')
# Defining Image augmentors
data_augmentation = keras.Sequential([
layers.experimental.preprocessing.RandomRotation(0.2),
layers.experimental.preprocessing.RandomTranslation(
0.2, 0.2, fill_mode='reflect', interpolation='bilinear'),
])
aug3 = iaa.MotionBlur(k=5, angle=[-90, 90])
aug4 = iaa.CoarseSaltAndPepper((0,0.02), size_px=(2, 8))
aug5 = iaa.AdditiveGaussianNoise(scale=(0, 0.05*255))
aug6 = iaa.Sharpen(alpha=(0,0.05), lightness=1.0)
aug7 = iaa.Dropout(p=(0, 0.05))
aug8 = iaa.LinearContrast((0.9, 1.0))
def add_aug(image):
#image = aug3.augment_image(image)
#image = aug4.augment_image(image)
#image = aug5.augment_image(image)
#image = aug6.augment_image(image)
#image = aug7.augment_image(image)
#image = aug8.augment_image(image)
return image
# Defining the three models in functions
def simpleModel():
model = Sequential([
data_augmentation,
#layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def AlexNet(): #batch 32, 224, 0.002, 0.01, True
model = Sequential([
data_augmentation,
#layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(96, 11, strides=4, padding='valid', activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=3, strides=2),
layers.Conv2D(256, 5, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=3, strides=2),
layers.Conv2D(384, 3, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.Conv2D(384, 3, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.BatchNormalization(),
layers.MaxPooling2D(pool_size=3, strides=2),
layers.Flatten(),
layers.Dense(4096, activation='relu'),
layers.Dropout(0.5),
layers.Dense(4096, activation='relu'),
layers.Dropout(0.5),
layers.Dense(num_classes)
])
opt = tf.keras.optimizers.SGD(learning_rate=0.001, momentum=0.01, nesterov=True)
model.compile(optimizer=opt,
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def VGG16(): #batch 64, 224, 0.002, 0.01, True
model = Sequential([
data_augmentation,
#layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(64, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(64, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(128, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(128, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(256, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.Conv2D(512, 3, strides=1, padding='same', activation='relu'),
layers.MaxPooling2D(pool_size=2), # stride defaults to pool_size which is OK
layers.Flatten(),
layers.Dense(4096, activation='relu'),
layers.Dense(4096, activation='relu'),
layers.Dense(num_classes)
])
opt = tf.keras.optimizers.SGD(learning_rate=0.02, momentum=0.2, nesterov=True)
model.compile(optimizer=opt,
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
num_classes = 7
batch_size = 64
img_height = 224 # 180, 227, 224
img_width = 224 # 180, 227, 224
data_dir = pathlib.Path('takaFrames_dataset/')
#image_count = len(list(data_dir.glob('*/*.jpg')))
#print(image_count)
print("\n")
datagen_args = dict(rescale=1./255, validation_split=0.1, preprocessing_function=add_aug)
datagen = ImageDataGenerator(**datagen_args)
train_ds = datagen.flow_from_directory(
data_dir,
subset="training",
target_size=(img_height, img_width),
batch_size=batch_size)
print("\n")
val_ds = datagen.flow_from_directory(
data_dir,
subset="validation",
target_size=(img_height, img_width),
batch_size=batch_size)
print("\n")
print(train_ds.class_indices)
class_mapping = {v:k for k,v in train_ds.class_indices.items()}
x,y = next(train_ds)
print('x: ',type(x))
print('y: ',type(y))
print('x: ',x.shape)
print('y: ',y.shape)
print("\n")
y_int = np.argmax(y,axis=-1)
show_grid(x,4,8,label_list=y_int,show_labels=True,figsize=(20,10))
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1', '/job:localhost/replica:0/task:0/device:GPU:2', '/job:localhost/replica:0/task:0/device:GPU:3')
Number of devices: 4
Found 43694 images belonging to 7 classes.
Found 4851 images belonging to 7 classes.
{'Children': 0, 'Cosmetics': 1, 'FashionAccessories': 2, 'Household': 3, 'Ladies': 4, 'Men': 5, 'Sports': 6}
x: <class 'numpy.ndarray'>
y: <class 'numpy.ndarray'>
x: (64, 224, 224, 3)
y: (64, 7)
with strategy.scope():
model = VGG16()
checkpoint = ModelCheckpoint("checkpoint.h5", monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto')
early = EarlyStopping(monitor='val_accuracy', min_delta=0, patience=5, verbose=1, mode='auto')
epochs = 15
history = model.fit(train_ds,
validation_data=val_ds,
validation_steps=5,
epochs=epochs, # 100 seems to be enough to achieve >90% accuracies
callbacks=[checkpoint,early]
)
# Plotting results
plt.plot(history.history["accuracy"])
plt.plot(history.history['val_accuracy'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title("model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Accuracy","Validation Accuracy","loss","Validation Loss"])
plt.show()
WARNING:tensorflow:Using MirroredStrategy eagerly has significant overhead currently. We will be working on improving this in the future, but for now please wrap `call_for_each_replica` or `experimental_run` or `experimental_run_v2` inside a tf.function to get the best performance.
Epoch 1/15
WARNING:tensorflow:From /home/mdl-ws/environments/env0/lib/python3.8/site-packages/tensorflow/python/data/ops/multi_device_iterator_ops.py:601: get_next_as_optional (from tensorflow.python.data.ops.iterator_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.data.Iterator.get_next_as_optional()` instead.
INFO:tensorflow:batch_all_reduce: 32 all-reduces with algorithm = nccl, num_packs = 1
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:batch_all_reduce: 32 all-reduces with algorithm = nccl, num_packs = 1
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
683/683 [==============================] - ETA: 0s - loss: 1.9180 - accuracy: 0.1900INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',).
Epoch 00001: val_accuracy improved from -inf to 0.15937, saving model to checkpoint.h5
683/683 [==============================] - 265s 389ms/step - loss: 1.9180 - accuracy: 0.1900 - val_loss: 1.9351 - val_accuracy: 0.1594
Epoch 2/15
683/683 [==============================] - ETA: 0s - loss: 1.9191 - accuracy: 0.1864
Epoch 00002: val_accuracy improved from 0.15937 to 0.21250, saving model to checkpoint.h5
683/683 [==============================] - 255s 373ms/step - loss: 1.9191 - accuracy: 0.1864 - val_loss: 1.8911 - val_accuracy: 0.2125
Epoch 3/15
683/683 [==============================] - ETA: 0s - loss: 1.9095 - accuracy: 0.1954
Epoch 00003: val_accuracy did not improve from 0.21250
683/683 [==============================] - 252s 369ms/step - loss: 1.9095 - accuracy: 0.1954 - val_loss: 1.9163 - val_accuracy: 0.1688
Epoch 4/15
683/683 [==============================] - ETA: 0s - loss: 1.8413 - accuracy: 0.2484
Epoch 00004: val_accuracy did not improve from 0.21250
683/683 [==============================] - 252s 369ms/step - loss: 1.8413 - accuracy: 0.2484 - val_loss: 1.9141 - val_accuracy: 0.2125
Epoch 5/15
683/683 [==============================] - ETA: 0s - loss: 1.9111 - accuracy: 0.1923
Epoch 00005: val_accuracy improved from 0.21250 to 0.39062, saving model to checkpoint.h5
683/683 [==============================] - 253s 371ms/step - loss: 1.9111 - accuracy: 0.1923 - val_loss: 1.5977 - val_accuracy: 0.3906
Epoch 6/15
683/683 [==============================] - ETA: 0s - loss: 1.6815 - accuracy: 0.3513
Epoch 00006: val_accuracy did not improve from 0.39062
683/683 [==============================] - 251s 368ms/step - loss: 1.6815 - accuracy: 0.3513 - val_loss: 1.4731 - val_accuracy: 0.3688
Epoch 7/15
683/683 [==============================] - ETA: 0s - loss: 1.4911 - accuracy: 0.4318
Epoch 00007: val_accuracy improved from 0.39062 to 0.43750, saving model to checkpoint.h5
683/683 [==============================] - 253s 370ms/step - loss: 1.4911 - accuracy: 0.4318 - val_loss: 1.5046 - val_accuracy: 0.4375
Epoch 8/15
683/683 [==============================] - ETA: 0s - loss: 1.2568 - accuracy: 0.5293
Epoch 00008: val_accuracy improved from 0.43750 to 0.54375, saving model to checkpoint.h5
683/683 [==============================] - 253s 371ms/step - loss: 1.2568 - accuracy: 0.5293 - val_loss: 1.0759 - val_accuracy: 0.5437
Epoch 9/15
683/683 [==============================] - ETA: 0s - loss: 0.9921 - accuracy: 0.6372
Epoch 00009: val_accuracy did not improve from 0.54375
683/683 [==============================] - 251s 367ms/step - loss: 0.9921 - accuracy: 0.6372 - val_loss: 1.2677 - val_accuracy: 0.4875
Epoch 10/15
683/683 [==============================] - ETA: 0s - loss: 0.7649 - accuracy: 0.7251
Epoch 00010: val_accuracy improved from 0.54375 to 0.63437, saving model to checkpoint.h5
683/683 [==============================] - 253s 370ms/step - loss: 0.7649 - accuracy: 0.7251 - val_loss: 1.0771 - val_accuracy: 0.6344
Epoch 11/15
683/683 [==============================] - ETA: 0s - loss: 0.5934 - accuracy: 0.7921
Epoch 00011: val_accuracy did not improve from 0.63437
683/683 [==============================] - 251s 368ms/step - loss: 0.5934 - accuracy: 0.7921 - val_loss: 1.1877 - val_accuracy: 0.6156
Epoch 12/15
683/683 [==============================] - ETA: 0s - loss: 0.4418 - accuracy: 0.8482
Epoch 00012: val_accuracy did not improve from 0.63437
683/683 [==============================] - 251s 368ms/step - loss: 0.4418 - accuracy: 0.8482 - val_loss: 1.5852 - val_accuracy: 0.6094
Epoch 13/15
683/683 [==============================] - ETA: 0s - loss: 0.3364 - accuracy: 0.8861
Epoch 00013: val_accuracy improved from 0.63437 to 0.73125, saving model to checkpoint.h5
683/683 [==============================] - 253s 371ms/step - loss: 0.3364 - accuracy: 0.8861 - val_loss: 1.2663 - val_accuracy: 0.7312
Epoch 14/15
683/683 [==============================] - ETA: 0s - loss: 0.2573 - accuracy: 0.9133
Epoch 00014: val_accuracy did not improve from 0.73125
683/683 [==============================] - 251s 368ms/step - loss: 0.2573 - accuracy: 0.9133 - val_loss: 1.2172 - val_accuracy: 0.6969
Epoch 15/15
683/683 [==============================] - ETA: 0s - loss: 0.2288 - accuracy: 0.9224
Epoch 00015: val_accuracy did not improve from 0.73125
683/683 [==============================] - 251s 368ms/step - loss: 0.2288 - accuracy: 0.9224 - val_loss: 1.3563 - val_accuracy: 0.7031
# Testing the generalisability of the Takashimaya Ngee Ann City trained model against images from Isetan Shaw Departmental store.
test_dir = pathlib.Path('Isetan45_testSet/')
classes_ds = tf.keras.preprocessing.image_dataset_from_directory('takaFrames_dataset/')
class_names = classes_ds.class_names
imgs_only = [img for img in os.listdir(test_dir)]
for img in imgs_only:
img = os.path.join(test_dir, img)
print(img)
img = keras.preprocessing.image.load_img(img, target_size=(img_width, img_height))
plt.imshow(img)
img = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.show()
print("Likely {} with {:.2f}% confidence.\n\n".format(class_names[np.argmax(score)], 100 * np.max(score)))
Found 48545 files belonging to 7 classes. Isetan45_testSet/Cosmetics_3.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Accesories_4.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Men_1.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Children_4.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Cosmetics_2.JPG
Likely Cosmetics with 100.00% confidence. Isetan45_testSet/Household_1.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Household_3.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Sports_4.JPG
Likely Children with 100.00% confidence. Isetan45_testSet/Cosmetics_1.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Sports_1.JPG
Likely Cosmetics with 94.67% confidence. Isetan45_testSet/Men_5.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Ladies_2.JPG
Likely Cosmetics with 100.00% confidence. Isetan45_testSet/Children_3.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Accesories_2.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Children_5.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Sports_5.JPG
Likely Cosmetics with 100.00% confidence. Isetan45_testSet/Household_2.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Sports_2.JPG
Likely Cosmetics with 100.00% confidence. Isetan45_testSet/Cosmetics_4.JPG
Likely Cosmetics with 100.00% confidence. Isetan45_testSet/Ladies_4.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Accesories_3.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Household_5.JPG
Likely Household with 100.00% confidence. Isetan45_testSet/Cosmetics_5.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Children_1.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Children_2.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Household_4.JPG
Likely Ladies with 100.00% confidence. Isetan45_testSet/Ladies_3.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Sports_3.JPG
Likely Cosmetics with 100.00% confidence. Isetan45_testSet/Ladies_5.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Accesories_5.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Ladies_1.JPG
Likely Sports with 100.00% confidence. Isetan45_testSet/Men_3.JPG
Likely Cosmetics with 100.00% confidence. Isetan45_testSet/Men_4.JPG
Likely Cosmetics with 100.00% confidence. Isetan45_testSet/Accesories_1.JPG
Likely FashionAccessories with 100.00% confidence. Isetan45_testSet/Men_2.JPG
Likely Cosmetics with 100.00% confidence.